[XEN] Small clean up.
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 25 Sep 2006 16:45:28 +0000 (17:45 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Mon, 25 Sep 2006 16:45:28 +0000 (17:45 +0100)
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/common/schedule.c

index 0b5ab9501ca8a5d0ab909af2a9f8d3cb26c3e03c..b7ed65aa4c65a8a6f4d66a5458c056523abf5c52 100644 (file)
@@ -41,7 +41,6 @@ string_param("sched", opt_sched);
 static unsigned int opt_dom0_vcpus_pin;
 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
 
-
 #define TIME_SLOP      (s32)MICROSECS(50)     /* allow time to slip a bit */
 
 /* Various timer handlers. */
@@ -104,36 +103,30 @@ void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate)
 
 int sched_init_vcpu(struct vcpu *v, unsigned int processor) 
 {
-    const struct domain * const d = v->domain;
+    struct domain *d = v->domain;
 
-    /* Initialize processor and affinity settings. */
+    /*
+     * Initialize processor and affinity settings. The idler, and potentially
+     * domain-0 VCPUs, are pinned onto their respective physical CPUs.
+     */
     v->processor = processor;
-
-    if ( is_idle_domain(d) || (d->domain_id == 0 && opt_dom0_vcpus_pin) )
-    {
-        /*
-         * The idler and potentially dom0 VCPUs are pinned onto their
-         * respective physical CPUs.
-         */
+    if ( is_idle_domain(d) || ((d->domain_id == 0) && opt_dom0_vcpus_pin) )
         v->cpu_affinity = cpumask_of_cpu(processor);
-
-        /* The idle VCPUs takes over their CPUs on creation... */
-        if ( is_idle_domain(d) )
-        {
-            per_cpu(schedule_data, v->processor).curr = v;
-            per_cpu(schedule_data, v->processor).idle = v;
-            set_bit(_VCPUF_running, &v->vcpu_flags);
-        }
-    }
     else
-    {
         v->cpu_affinity = CPU_MASK_ALL;
-    }
 
     /* Initialise the per-domain timers. */
     init_timer(&v->timer, vcpu_timer_fn, v, v->processor);
     init_timer(&v->poll_timer, poll_timer_fn, v, v->processor);
 
+    /* Idle VCPUs are scheduled immediately. */
+    if ( is_idle_domain(d) )
+    {
+        per_cpu(schedule_data, v->processor).curr = v;
+        per_cpu(schedule_data, v->processor).idle = v;
+        set_bit(_VCPUF_running, &v->vcpu_flags);
+    }
+
     TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
 
     return SCHED_OP(init_vcpu, v);
@@ -236,7 +229,7 @@ int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
     cpumask_t online_affinity;
     unsigned long flags;
 
-    if ( v->domain->domain_id == 0 && opt_dom0_vcpus_pin )
+    if ( (v->domain->domain_id == 0) && opt_dom0_vcpus_pin )
         return -EINVAL;
 
     cpus_and(online_affinity, *affinity, cpu_online_map);